#include <asm/msr.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
+#include <asm/hvm/svm/svm.h>
#include <asm/hvm/svm/vmcb.h>
#include <asm/hvm/svm/emulate.h>
+
extern int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
int inst_len);
#define sib operand [1]
-unsigned long get_effective_addr_modrm64(struct vmcb_struct *vmcb,
- struct cpu_user_regs *regs, const u8 prefix, int inst_len,
- const u8 *operand, u8 *size)
+unsigned long get_effective_addr_modrm64(struct cpu_user_regs *regs,
+ const u8 prefix, int inst_len,
+ const u8 *operand, u8 *size)
{
unsigned long effective_addr = (unsigned long) -1;
u8 length, modrm_mod, modrm_rm;
u32 disp = 0;
+ struct vcpu *v = current;
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
HVM_DBG_LOG(DBG_LEVEL_1, "get_effective_addr_modrm64(): prefix = %x, "
"length = %d, operand[0,1] = %x %x.\n", prefix, *size, operand [0],
#if __x86_64__
/* 64-bit mode */
- if (vmcb->cs.attr.fields.l && (vmcb->efer & EFER_LMA))
+ if (vmcb->cs.attr.fields.l && svm_long_mode_enabled(v))
return vmcb->rip + inst_len + *size + disp;
#endif
return disp;
}
-unsigned long svm_rip2pointer(struct vmcb_struct *vmcb)
+unsigned long svm_rip2pointer(struct vcpu *v)
{
/*
* The following is subtle. Intuitively this code would be something like:
* %cs is update, but fortunately, base contain the valid base address
* no matter what kind of addressing is used.
*/
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
unsigned long p = vmcb->cs.base + vmcb->rip;
- if (!(vmcb->cs.attr.fields.l && vmcb->efer & EFER_LMA))
+ if (!(vmcb->cs.attr.fields.l && svm_long_mode_enabled(v)))
return (u32)p; /* mask to 32 bits */
/* NB. Should mask to 16 bits if in real mode or 16-bit protected mode. */
return p;
* The caller can either pass a NULL pointer to the guest_eip_buf, or a pointer
* to enough bytes to satisfy the instruction including prefix bytes.
*/
-int __get_instruction_length_from_list(struct vmcb_struct *vmcb,
+int __get_instruction_length_from_list(struct vcpu *v,
enum instruction_index *list, unsigned int list_count,
u8 *guest_eip_buf, enum instruction_index *match)
{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
unsigned int inst_len = 0;
unsigned int i;
unsigned int j;
}
else
{
- inst_copy_from_guest(buffer, svm_rip2pointer(vmcb), MAX_INST_LEN);
+ inst_copy_from_guest(buffer, svm_rip2pointer(v), MAX_INST_LEN);
buf = buffer;
}
}
}
-static int svm_lme_is_set(struct vcpu *v)
-{
- u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
- return guest_efer & EFER_LME;
-}
-
-static int svm_cr4_pae_is_set(struct vcpu *v)
-{
- unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
- return guest_cr4 & X86_CR4_PAE;
-}
-
-static int svm_paging_enabled(struct vcpu *v)
-{
- unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
- return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
-}
-
-static int svm_pae_enabled(struct vcpu *v)
-{
- unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
- return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
-}
-
-static int svm_long_mode_enabled(struct vcpu *v)
-{
- u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
- return guest_efer & EFER_LMA;
-}
static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
{
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- if ( (vmcb->efer & EFER_LMA) && vmcb->cs.attr.fields.l )
+ if ( svm_long_mode_enabled(v) && vmcb->cs.attr.fields.l )
return 8;
if ( svm_realmode(v) )
int long_mode = 0;
#ifdef __x86_64__
- long_mode = vmcb->cs.attr.fields.l && (vmcb->efer & EFER_LMA);
+ long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
#endif
switch ( seg )
{
printk("Huh? We got a GP Fault with an invalid IDTR!\n");
svm_dump_vmcb(__func__, vmcb);
svm_dump_regs(__func__, regs);
- svm_dump_inst(svm_rip2pointer(vmcb));
+ svm_dump_inst(svm_rip2pointer(v));
domain_crash(v->domain);
return;
}
HVMTRACE_3D(CPUID, v, input,
((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx);
- inst_len = __get_instruction_length(vmcb, INSTR_CPUID, NULL);
+ inst_len = __get_instruction_length(v, INSTR_CPUID, NULL);
ASSERT(inst_len > 0);
__update_guest_eip(vmcb, inst_len);
}
}
-static void svm_get_prefix_info(
- struct vmcb_struct *vmcb,
- unsigned int dir, svm_segment_register_t **seg, unsigned int *asize)
+static void svm_get_prefix_info(struct vcpu *v, unsigned int dir,
+ svm_segment_register_t **seg,
+ unsigned int *asize)
{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
unsigned char inst[MAX_INST_LEN];
int i;
memset(inst, 0, MAX_INST_LEN);
- if (inst_copy_from_guest(inst, svm_rip2pointer(vmcb), sizeof(inst))
+ if (inst_copy_from_guest(inst, svm_rip2pointer(v), sizeof(inst))
!= MAX_INST_LEN)
{
gdprintk(XENLOG_ERR, "get guest instruction failed\n");
#ifdef __x86_64__
/* If we're in long mode, we shouldn't check the segment presence & limit */
- long_mode = vmcb->cs.attr.fields.l && vmcb->efer & EFER_LMA;
+ long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
#endif
/* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit.
isize --;
if (isize > 1)
- svm_get_prefix_info(vmcb, info.fields.type, &seg, &asize);
+ svm_get_prefix_info(v, info.fields.type, &seg, &asize);
if (info.fields.type == IOREQ_WRITE)
{
}
-static inline int svm_pgbit_test(struct vcpu *v)
-{
- return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
-}
-
-
/*
* Write to control registers
*/
ASSERT(vmcb);
- inst_copy_from_guest(buffer, svm_rip2pointer(vmcb), sizeof(buffer));
+ inst_copy_from_guest(buffer, svm_rip2pointer(v), sizeof(buffer));
/* get index to first actual instruction byte - as we will need to know
where the prefix lives later on */
if ( type == TYPE_MOV_TO_CR )
{
inst_len = __get_instruction_length_from_list(
- vmcb, list_a, ARR_SIZE(list_a), &buffer[index], &match);
+ v, list_a, ARR_SIZE(list_a), &buffer[index], &match);
}
else /* type == TYPE_MOV_FROM_CR */
{
inst_len = __get_instruction_length_from_list(
- vmcb, list_b, ARR_SIZE(list_b), &buffer[index], &match);
+ v, list_b, ARR_SIZE(list_b), &buffer[index], &match);
}
ASSERT(inst_len > 0);
case INSTR_LMSW:
if (svm_dbg_on)
- svm_dump_inst(svm_rip2pointer(vmcb));
+ svm_dump_inst(svm_rip2pointer(v));
gpreg = decode_src_reg(prefix, buffer[index+2]);
value = get_reg(gpreg, regs, vmcb) & 0xF;
case INSTR_SMSW:
if (svm_dbg_on)
- svm_dump_inst(svm_rip2pointer(vmcb));
+ svm_dump_inst(svm_rip2pointer(v));
value = v->arch.hvm_svm.cpu_shadow_cr0;
gpreg = decode_src_reg(prefix, buffer[index+2]);
set_reg(gpreg, value, regs, vmcb);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
- inst_len = __get_instruction_length(vmcb, INSTR_RDMSR, NULL);
+ inst_len = __get_instruction_length(v, INSTR_RDMSR, NULL);
}
else
{
break;
}
- inst_len = __get_instruction_length(vmcb, INSTR_WRMSR, NULL);
+ inst_len = __get_instruction_length(v, INSTR_WRMSR, NULL);
}
__update_guest_eip(vmcb, inst_len);
}
-static void svm_vmexit_do_invd(struct vmcb_struct *vmcb)
+static void svm_vmexit_do_invd(struct vcpu *v)
{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
int inst_len;
/* Invalidate the cache - we can't really do that safely - maybe we should
*/
printk("INVD instruction intercepted - ignored\n");
- inst_len = __get_instruction_length(vmcb, INSTR_INVD, NULL);
+ inst_len = __get_instruction_length(v, INSTR_INVD, NULL);
__update_guest_eip(vmcb, inst_len);
}
* Unknown how many bytes the invlpg instruction will take. Use the
* maximum instruction length here
*/
- if (inst_copy_from_guest(opcode, svm_rip2pointer(vmcb), length) < length)
+ if (inst_copy_from_guest(opcode, svm_rip2pointer(v), length) < length)
{
gdprintk(XENLOG_ERR, "Error reading memory %d bytes\n", length);
domain_crash(v->domain);
if (invlpga)
{
- inst_len = __get_instruction_length(vmcb, INSTR_INVLPGA, opcode);
+ inst_len = __get_instruction_length(v, INSTR_INVLPGA, opcode);
ASSERT(inst_len > 0);
__update_guest_eip(vmcb, inst_len);
{
/* What about multiple prefix codes? */
prefix = (is_prefix(opcode[0])?opcode[0]:0);
- inst_len = __get_instruction_length(vmcb, INSTR_INVLPG, opcode);
+ inst_len = __get_instruction_length(v, INSTR_INVLPG, opcode);
ASSERT(inst_len > 0);
inst_len--;
* displacement to get effective address and length in bytes. Assume
* the system in either 32- or 64-bit mode.
*/
- g_vaddr = get_effective_addr_modrm64(vmcb, regs, prefix, inst_len,
+ g_vaddr = get_effective_addr_modrm64(regs, prefix, inst_len,
&opcode[inst_len], &length);
inst_len += length;
ASSERT(vmcb);
ASSERT(regs);
- inst_len = __get_instruction_length(vmcb, INSTR_VMCALL, NULL);
+ inst_len = __get_instruction_length(v, INSTR_VMCALL, NULL);
ASSERT(inst_len > 0);
HVMTRACE_1D(VMMCALL, v, regs->eax);
svm_dump_vmcb(__func__, vmcb);
svm_dump_regs(__func__, regs);
- svm_dump_inst(svm_rip2pointer(vmcb));
+ svm_dump_inst(svm_rip2pointer(v));
}
#if defined(__i386__)
/* Debug info to hopefully help debug WHY the guest double-faulted. */
svm_dump_vmcb(__func__, vmcb);
svm_dump_regs(__func__, regs);
- svm_dump_inst(svm_rip2pointer(vmcb));
+ svm_dump_inst(svm_rip2pointer(v));
svm_inject_exception(v, TRAP_double_fault, 1, 0);
break;
break;
case VMEXIT_INVD:
- svm_vmexit_do_invd(vmcb);
+ svm_vmexit_do_invd(v);
break;
case VMEXIT_GDTR_WRITE: